Ubuntu 16.04
Sponsored Link

Ceph : Use as File System
2016/04/23
 
Configure Clients to use Ceph Storage like follows.
                                         |
        +--------------------+           |           +-------------------+
        |   [dlp.srv.world]  |10.0.0.30  |   10.0.0.x|   [   Client  ]   |
        |    Ceph-Deploy     +-----------+-----------+                   |
        |                    |           |           |                   |
        +--------------------+           |           +-------------------+
            +----------------------------+----------------------------+
            |                            |                            |
            |10.0.0.51                   |10.0.0.52                   |10.0.0.53 
+-----------+-----------+    +-----------+-----------+    +-----------+-----------+
|   [node01.srv.world]  |    |   [node02.srv.world]  |    |   [node03.srv.world]  |
|     Object Storage    +----+     Object Storage    +----+     Object Storage    |
|     Monitor Daemon    |    |                       |    |                       |
|                       |    |                       |    |                       |
+-----------------------+    +-----------------------+    +-----------------------+

 
For example, mount as Filesystem on a Client.
[1] Create MDS (MetaData Server) on a Node which you'd like to set MDS. It sets to node01 on this exmaple.
ubuntu@dlp:~/ceph$
ceph-deploy mds create node01

[2] Create at least 2 RADOS pools on MDS Node and activate MetaData Server.
For pg_num which is specified at the end of a creating command, refer to official document and decide appropriate value.
⇒ http://docs.ceph.com/docs/master/rados/operations/placement-groups/
# create pools

ubuntu@node01:~$
sudo ceph osd pool create cephfs_data 128

pool 'cephfs_data' created
ubuntu@node01:~$
sudo ceph osd pool create cephfs_metadata 128

pool 'cephfs_metadata' created
# enable pool

ubuntu@node01:~$
sudo ceph fs new cephfs cephfs_metadata cephfs_data

new fs with metadata pool 2 and data pool 1
# show status

ubuntu@node01:~$
sudo ceph fs ls

name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
ubuntu@node01:~$
sudo ceph mds stat

e5: 1/1/1 up {0=node01=up:active}
[3] Mount CephFS on a Client.
root@client:~#
apt-get -y install ceph-fs-common ceph-fuse
# get admin key

root@client:~#
ssh ubuntu@node01.srv.world "sudo ceph-authtool -p /etc/ceph/ceph.client.admin.keyring" > admin.key

ubuntu@node01.srv.world's password:
root@client:~#
chmod 600 admin.key
root@client:~#
mount -t ceph node01.srv.world:6789:/ /mnt -o name=admin,secretfile=admin.key

root@client:~#
df -hT

Filesystem                  Type      Size  Used Avail Use% Mounted on
udev                        devtmpfs  2.0G     0  2.0G   0% /dev
tmpfs                       tmpfs     396M  5.6M  390M   2% /run
/dev/mapper/ubuntu--vg-root ext4       25G  1.6G   23G   7% /
tmpfs                       tmpfs     2.0G     0  2.0G   0% /dev/shm
tmpfs                       tmpfs     5.0M     0  5.0M   0% /run/lock
tmpfs                       tmpfs     2.0G     0  2.0G   0% /sys/fs/cgroup
/dev/vda1                   ext2      472M   55M  393M  13% /boot
tmpfs                       tmpfs     100K     0  100K   0% /run/lxcfs/controllers
tmpfs                       tmpfs     396M     0  396M   0% /run/user/0
10.0.0.51:6789:/            ceph       75G   24G   52G  32% /mnt
 
Tweet